EXPORT_SYMBOL(enable_hlt);
-/*
- * We use this if we don't have any better
- * idle routine..
- */
-void default_idle(void)
+/* XXX XEN doesn't use default_idle(), poll_idle(). Use xen_idle() instead. */
+extern int set_timeout_timer(void);
+void xen_idle(void)
{
- if (!hlt_counter && current_cpu_data.hlt_works_ok) {
- local_irq_disable();
- if (!need_resched())
- safe_halt();
- else
- local_irq_enable();
- }
-}
+ int cpu = smp_processor_id();
-/*
- * On SMP it's slightly faster (but much more power-consuming!)
- * to poll the ->work.need_resched flag instead of waiting for the
- * cross-CPU IPI to arrive. Use this option with caution.
- */
-static void poll_idle (void)
-{
- int oldval;
+ local_irq_disable();
- local_irq_enable();
+ if (rcu_pending(cpu))
+ rcu_check_callbacks(cpu, 0);
- /*
- * Deal with another CPU just having chosen a thread to
- * run here:
- */
- oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);
-
- if (!oldval) {
- set_thread_flag(TIF_POLLING_NRFLAG);
- asm volatile(
- "2:"
- "testl %0, %1;"
- "rep; nop;"
- "je 2b;"
- : : "i"(_TIF_NEED_RESCHED), "m" (current_thread_info()->flags));
-
- clear_thread_flag(TIF_POLLING_NRFLAG);
+ if (need_resched()) {
+ local_irq_enable();
+ } else if (set_timeout_timer() == 0) {
+ /* NB. Blocking reenable events in a race-free manner. */
+ HYPERVISOR_block();
} else {
- set_need_resched();
+ local_irq_enable();
+ HYPERVISOR_yield();
}
}
/* endless idle loop with no priority at all */
while (1) {
while (!need_resched()) {
- void (*idle)(void);
/*
* Mark this as an RCU critical section so that
* synchronize_kernel() in the unload path waits
* for our completion.
*/
rcu_read_lock();
- idle = pm_idle;
-
- if (!idle)
- idle = default_idle;
-
irq_stat[smp_processor_id()].idle_timestamp = jiffies;
- idle();
+ xen_idle();
rcu_read_unlock();
}
schedule();
}
}
-/*
- * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
- * which can obviate IPI to trigger checking of need_resched.
- * We execute MONITOR against need_resched and enter optimized wait state
- * through MWAIT. Whenever someone changes need_resched, we would be woken
- * up from MWAIT (without an IPI).
- */
-static void mwait_idle(void)
-{
- local_irq_enable();
-
- if (!need_resched()) {
- set_thread_flag(TIF_POLLING_NRFLAG);
- do {
- __monitor((void *)¤t_thread_info()->flags, 0, 0);
- if (need_resched())
- break;
- __mwait(0, 0);
- } while (!need_resched());
- clear_thread_flag(TIF_POLLING_NRFLAG);
- }
-}
-
-void __init select_idle_routine(const struct cpuinfo_x86 *c)
-{
- if (cpu_has(c, X86_FEATURE_MWAIT)) {
- printk("monitor/mwait feature present.\n");
- /*
- * Skip, if setup has overridden idle.
- * One CPU supports mwait => All CPUs supports mwait
- */
- if (!pm_idle) {
- printk("using mwait in idle threads.\n");
- pm_idle = mwait_idle;
- }
- }
-}
-
-static int __init idle_setup (char *str)
-{
- if (!strncmp(str, "poll", 4)) {
- printk("using polling idle threads.\n");
- pm_idle = poll_idle;
-#ifdef CONFIG_X86_SMP
- if (smp_num_siblings > 1)
- printk("WARNING: polling idle and HT enabled, performance may degrade.\n");
-#endif
- } else if (!strncmp(str, "halt", 4)) {
- printk("using halt in idle threads.\n");
- pm_idle = default_idle;
- }
-
- return 1;
-}
-
-__setup("idle=", idle_setup);
+/* XXX XEN doesn't use mwait_idle(), select_idle_routine(), idle_setup(). */
+/* Always use xen_idle() instead. */
+void __init select_idle_routine(const struct cpuinfo_x86 *c) {}
void show_regs(struct pt_regs * regs)
{